[XEN][POWERPC] catch up with xen-unstable changes
authorHollis Blanchard <hollisb@us.ibm.com>
Tue, 29 Aug 2006 22:53:57 +0000 (17:53 -0500)
committerHollis Blanchard <hollisb@us.ibm.com>
Tue, 29 Aug 2006 22:53:57 +0000 (17:53 -0500)
Signed-off-by: Hollis Blanchard <hollisb@us.ibm.com>
xen/arch/powerpc/Rules.mk
xen/arch/powerpc/boot_of.c
xen/arch/powerpc/dom0_ops.c
xen/arch/powerpc/setup.c
xen/arch/powerpc/shadow.c
xen/include/asm-powerpc/shadow.h

index 592ebec0031eddbe91ee1dd57730054685d5091d..f395e88df5717cded146938bb5bbca3e3db8297b 100644 (file)
@@ -4,7 +4,7 @@ CC := $(CROSS_COMPILE)gcc
 LD := $(CROSS_COMPILE)ld
 
 # These are goodess that applies to all source.
-C_WARNINGS := -Wpointer-arith -Wredundant-decls
+C_WARNINGS := -Wredundant-decls
 
 # _no_ common code can have packed data structures or we are in touble.
 C_WARNINGS += -Wpacked
index 5841925e214a34275ec8c649a09155a92e838655..619dd2717b87a2a5f2178d8bf63b48f03fd2b0f9 100644 (file)
@@ -993,6 +993,7 @@ static int __init boot_of_cpus(void)
     /* We want a continuous logical cpu number space.  */
     cpu_set(0, cpu_present_map);
     cpu_set(0, cpu_online_map);
+    cpu_set(0, cpu_possible_map);
 
     /* Spin up all CPUS, even if there are more than NR_CPUS, because
      * Open Firmware has them spinning on cache lines which will
@@ -1039,8 +1040,10 @@ static int __init boot_of_cpus(void)
         } while (pong == ping);
         of_printf("pong = 0x%x\n", pong);
 
-        if (pong != ping)
+        if (pong != ping) {
             cpu_set(logical, cpu_present_map);
+            cpu_set(logical, cpu_possible_map);
+        }
 
         cpu = of_getpeer(cpu);
     }
index 9aa9118abfeceb7679d4633b4de84b5652f63fee..975e11d65d7db0b7cba847b9af0641e8733bd393 100644 (file)
 #include <public/domctl.h>
 #include <public/sysctl.h>
 
+void arch_getdomaininfo_ctxt(struct vcpu *, vcpu_guest_context_t *);
 void arch_getdomaininfo_ctxt(struct vcpu *v, vcpu_guest_context_t *c)
 { 
     memcpy(&c->user_regs, &v->arch.ctxt, sizeof(struct cpu_user_regs));
     /* XXX fill in rest of vcpu_guest_context_t */
 }
 
+long arch_do_domctl(struct xen_domctl *domctl,
+                    XEN_GUEST_HANDLE(xen_domctl_t) u_domctl);
 long arch_do_domctl(struct xen_domctl *domctl,
                     XEN_GUEST_HANDLE(xen_domctl_t) u_domctl)
 {
@@ -76,6 +79,19 @@ long arch_do_domctl(struct xen_domctl *domctl,
         }
     }
     break;
+    case XEN_DOMCTL_shadow_op:
+    {
+        struct domain *d;
+        ret = -ESRCH;
+        d = find_domain_by_id(domctl->domain);
+        if ( d != NULL )
+        {
+            ret = shadow_domctl(d, &domctl->u.shadow_op, u_domctl);
+            put_domain(d);
+            copy_to_guest(u_domctl, domctl, 1);
+        } 
+    }
+    break;
 
     default:
         ret = -ENOSYS;
@@ -85,6 +101,8 @@ long arch_do_domctl(struct xen_domctl *domctl,
     return ret;
 }
 
+long arch_do_sysctl(struct xen_sysctl *sysctl,
+                    XEN_GUEST_HANDLE(xen_sysctl_t) u_sysctl);
 long arch_do_sysctl(struct xen_sysctl *sysctl,
                     XEN_GUEST_HANDLE(xen_sysctl_t) u_sysctl)
 {
@@ -108,22 +126,9 @@ long arch_do_sysctl(struct xen_sysctl *sysctl,
             ret = -EFAULT;
     }
     break;
-    case DOM0_SHADOW_CONTROL:
-    {
-        struct domain *d;
-        ret = -ESRCH;
-        d = find_domain_by_id(op->u.shadow_control.domain);
-        if ( d != NULL )
-        {
-            ret = shadow_control_op(d, &op->u.shadow_control, u_dom0_op);
-            put_domain(d);
-            copy_to_guest(u_dom0_op, op, 1);
-        } 
-    }
-    break;
 
     default:
-        printk("%s: unsupported op: 0x%x\n", __func__, (op->cmd));
+        printk("%s: unsupported sysctl: 0x%x\n", __func__, (sysctl->cmd));
         ret = -ENOSYS;
         break;
     }
index 4ab5ec8ce17cb9ca5ecd81bf31e9c9d8d0522afd..9fd7fb3e839ee6ad8bce3c322abd40a0916ea09c 100644 (file)
@@ -74,6 +74,7 @@ ulong oftree_end;
 cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
 cpumask_t cpu_online_map; /* missing ifdef in schedule.c */
 cpumask_t cpu_present_map;
+cpumask_t cpu_possible_map;
 
 /* XXX get this from ISA node in device tree */
 ulong isa_io_base;
@@ -254,6 +255,7 @@ static int kick_secondary_cpus(int maxcpus)
             break;
         init_parea(cpuid);
         cpu_set(cpuid, cpu_online_map);
+        cpu_set(cpuid, cpu_possible_map);
     }
 
     return 0;
index 235313625053910bbe94ed9722ee96d698ed531f..e311b53dfd1726d118441315a681e8259cf2c14a 100644 (file)
@@ -21,7 +21,6 @@
 #include <xen/config.h>
 #include <xen/types.h>
 #include <xen/shadow.h>
-#include <public/dom0_ops.h>
 
 static ulong htab_calc_sdr1(ulong htab_addr, ulong log_htab_size)
 {
@@ -116,9 +115,9 @@ unsigned int shadow_set_allocation(struct domain *d,
     return rc;
 }
 
-int shadow_control_op(struct domain *d, 
-                      dom0_shadow_control_t *sc,
-                      XEN_GUEST_HANDLE(dom0_op_t) u_dom0_op)
+int shadow_domctl(struct domain *d, 
+                                 xen_domctl_shadow_op_t *sc,
+                                 XEN_GUEST_HANDLE(xen_domctl_t) u_domctl)
 {
     if ( unlikely(d == current->domain) )
     {
@@ -128,15 +127,15 @@ int shadow_control_op(struct domain *d,
 
     switch ( sc->op )
     {
-    case DOM0_SHADOW_CONTROL_OP_OFF:
+    case XEN_DOMCTL_SHADOW_OP_OFF:
          DPRINTK("Shadow is mandatory!\n");
          return -EINVAL;
 
-    case DOM0_SHADOW2_CONTROL_OP_GET_ALLOCATION:
+    case XEN_DOMCTL_SHADOW_OP_GET_ALLOCATION:
         sc->mb = shadow_get_allocation(d);
         return 0;
 
-    case DOM0_SHADOW2_CONTROL_OP_SET_ALLOCATION: {
+    case XEN_DOMCTL_SHADOW_OP_SET_ALLOCATION: {
         int rc;
         int preempted = 0;
 
@@ -145,7 +144,7 @@ int shadow_control_op(struct domain *d,
         if (preempted)
             /* Not finished.  Set up to re-run the call. */
             rc = hypercall_create_continuation(
-                __HYPERVISOR_dom0_op, "h", u_dom0_op);
+                __HYPERVISOR_domctl, "h", u_domctl);
         else 
             /* Finished.  Return the new allocation */
             sc->mb = shadow_get_allocation(d);
index 643907d926aeaa347148252006965e1da69f8d07..d8b61ab32844bc18586035f63d08dac8157e2d80 100644 (file)
@@ -57,14 +57,14 @@ static inline void mark_dirty(struct domain *d, unsigned int mfn)
 }
 #define gnttab_mark_dirty(d, f) mark_dirty((d), (f))
 
-extern int shadow_control_op(struct domain *d, 
-                             dom0_shadow_control_t *sc,
-                             XEN_GUEST_HANDLE(dom0_op_t) u_dom0_op);
+extern int shadow_domctl(struct domain *d, 
+                   xen_domctl_shadow_op_t *sc,
+                   XEN_GUEST_HANDLE(xen_domctl_t) u_domctl);
 extern unsigned int shadow_teardown(struct domain *d);
 extern unsigned int shadow_set_allocation(
     struct domain *d, unsigned int megabytes, int *preempted);
 
-/* Return the size of the shadow2 pool, rounded up to the nearest MB */
+/* Return the size of the shadow pool, rounded up to the nearest MB */
 static inline unsigned int shadow_get_allocation(struct domain *d)
 {
     return (1ULL << (d->arch.htab.order + PAGE_SHIFT)) >> 20;